#include <asm/sal.h>
#include <asm/hypervisor.h>
/* #include <asm-xen/evtchn.h> */
+#include <xen/interface/arch-ia64.h>
#include <linux/vmalloc.h>
shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
#include <asm/processor.h>
#include <asm/asmmacro.h>
+/* To clear vpsr.ic, vpsr.i needs to be cleared first */
+#define XEN_CLEAR_PSR_IC \
+ mov r14=1; \
+ movl r15=XSI_PSR_I_ADDR; \
+ movl r2=XSI_PSR_IC; \
+ ;; \
+ ld8 r15=[r15]; \
+ ld4 r3=[r2]; \
+ ;; \
+ ld1 r16=[r15]; \
+ ;; \
+ st1 [r15]=r14; \
+ st4 [r2]=r0; \
+ ;;
+
+/* First restore vpsr.ic, and then vpsr.i */
+#define XEN_RESTORE_PSR_IC \
+ st4 [r2]=r3; \
+ st1 [r15]=r16; \
+ ;;
+
GLOBAL_ENTRY(xen_get_ivr)
movl r8=running_on_xen;;
ld4 r8=[r8];;
(p7) mov r8=cr.ivr;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
- ;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_GET_IVR
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
+ ;;
br.ret.sptk.many rp
;;
END(xen_get_ivr)
(p7) mov r8=cr.tpr;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
- ;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_GET_TPR
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
+ ;;
br.ret.sptk.many rp
;;
END(xen_get_tpr)
(p7) mov cr.tpr=r32;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_SET_TPR
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
+ ;;
br.ret.sptk.many rp
;;
END(xen_set_tpr)
(p7) mov cr.eoi=r0;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_EOI
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
+ ;;
br.ret.sptk.many rp
;;
END(xen_eoi)
(p7) thash r8=r32;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_THASH
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
;;
(p7) mov cr.itm=r32;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_SET_ITM
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
;;
(p7) ptc.ga r32,r33;;
(p7) br.ret.sptk.many rp
;;
- movl r11=XSI_PSR_IC
mov r8=r32
mov r9=r33
;;
- ld8 r10=[r11]
- ;;
- st8 [r11]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_PTC_GA
;;
- st8 [r11]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
;;
(p7) mov r8=rr[r32];;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_GET_RR
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
;;
(p7) mov rr[r32]=r33;;
(p7) br.ret.sptk.many rp
;;
- movl r11=XSI_PSR_IC
mov r8=r32
mov r9=r33
;;
- ld8 r10=[r11]
- ;;
- st8 [r11]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_SET_RR
;;
- st8 [r11]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
;;
(p7) mov ar7=r9
(p7) br.ret.sptk.many rp;;
-1: movl r11=XSI_PSR_IC
- mov r8=r32
+1: mov r8=r32
mov r9=r33
;;
- ld8 r10=[r11]
- ;;
- st8 [r11]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_SET_KR
;;
- st8 [r11]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
END(xen_set_rr)
(p7) fc r32;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_FC
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
END(xen_fc)
(p7) mov r8=cpuid[r32];;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_GET_CPUID
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
END(xen_get_cpuid)
(p7) mov r8=pmd[r32];;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_GET_PMD
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
END(xen_get_pmd)
(p7) mov r8=ar24;;
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_GET_EFLAG
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
END(xen_get_eflag)
(p7) mov ar24=r32
(p7) br.ret.sptk.many rp
;;
- movl r9=XSI_PSR_IC
mov r8=r32
;;
- ld8 r10=[r9]
- ;;
- st8 [r9]=r0
+ XEN_CLEAR_PSR_IC
;;
XEN_HYPER_SET_EFLAG
;;
- st8 [r9]=r10
+ XEN_RESTORE_PSR_IC
;;
br.ret.sptk.many rp
END(xen_set_eflag)
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else /* !CONFIG_PREEMPT */
#ifdef CONFIG_XEN
- movl r2=XSI_PSR_I
+ movl r2=XSI_PSR_I_ADDR
+ mov r18=1
;;
-(pUStk) st4 [r2]=r0
+ ld8 r2=[r2]
+ ;;
+(pUStk) st1 [r2]=r18
#else
(pUStk) rsm psr.i
#endif
;;
invala // M0|1 invalidate ALAT
#ifdef CONFIG_XEN
+ movl r28=XSI_PSR_I_ADDR
movl r29=XSI_PSR_IC
;;
- st8 [r29]=r0 // note: clears both vpsr.i and vpsr.ic!
+ ld8 r28=[r28]
+ mov r30=1
+ ;;
+ st1 [r28]=r30
+ st4 [r29]=r0 // note: clears both vpsr.i and vpsr.ic!
;;
#else
rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else
#ifdef CONFIG_XEN
-(pUStk) movl r17=XSI_PSR_I
- ;;
-(pUStk) st4 [r17]=r0
+(pUStk) movl r17=XSI_PSR_I_ADDR
+(pUStk) mov r31=1
+ ;;
+(pUStk) ld8 r17=[r17]
+ ;;
+(pUStk) st1 [r17]=r31
;;
#else
(pUStk) rsm psr.i
mov ar.ssd=r31
;;
#ifdef CONFIG_XEN
+ movl r23=XSI_PSR_I_ADDR
movl r22=XSI_PSR_IC
;;
- st8 [r22]=r0 // note: clears both vpsr.i and vpsr.ic!
+ ld8 r23=[r23]
+ mov r25=1
+ ;;
+ st1 [r23]=r25
+ st4 [r22]=r0 // note: clears both vpsr.i and vpsr.ic!
;;
#else
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
br.call.spnt.many rp=schedule
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
#ifdef CONFIG_XEN
- movl r2=XSI_PSR_I
+ movl r2=XSI_PSR_I_ADDR
+ mov r20=1
+ ;;
+ ld8 r2=[r2]
;;
- st4 [r2]=r0
+ st1 [r2]=r20
#else
rsm psr.i // disable interrupts
#endif
// Leaving this code inline above results in an IVT section overflow
// There is no particular reason for this code to be here...
xen_page_fault:
-(p15) movl r3=XSI_PSR_I
+(p15) movl r3=XSI_PSR_I_ADDR
;;
-(p15) st4 [r3]=r14,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1
+(p15) ld8 r3=[r3]
+ ;;
+(p15) st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR // if (p15) vpsr.i = 1
mov r14=r0
;;
(p15) ld4 r14=[r3] // if (pending_interrupts)
mov r16=1
;;
#if 1
- st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC // vpsr.ic = 1
+ st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC // vpsr.ic = 1
+ ;;
+(p15) ld8 r3=[r3]
;;
-(p15) st4 [r3]=r16,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1
+(p15) st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR // if (p15) vpsr.i = 1
mov r16=r0
;;
(p15) ld4 r16=[r3] // if (pending_interrupts)
(p6) ssm psr.i // do a real ssm psr.i
;;
#else
-// st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC // vpsr.ic = 1
- adds r3=XSI_PSR_I-XSI_PSR_IC,r3 // SKIP vpsr.ic = 1
+// st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC // vpsr.ic = 1
+ adds r3=XSI_PSR_I_ADDR-XSI_PSR_IC,r3 // SKIP vpsr.ic = 1
+ ;;
+(p15) ld8 r3=[r3]
;;
-(p15) st4 [r3]=r16,XSI_PEND-XSI_PSR_I // if (p15) vpsr.i = 1
+(p15) st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR // if (p15) vpsr.i = 1
mov r16=r0
;;
(p15) ld4 r16=[r3] // if (pending_interrupts)
// from the idle loop so confuses privop counting
movl r31=XSI_PSR_IC
;;
-(p6) st8 [r31]=r0
+(p6) st4 [r31]=r0
;;
-(p7) adds r31=XSI_PSR_I-XSI_PSR_IC,r31
+(p7) adds r31=XSI_PSR_I_ADDR-XSI_PSR_IC,r31
+(p7) mov r22=1
;;
-(p7) st4 [r31]=r0
+(p7) ld8 r31=[r31]
+ ;;
+(p7) st1 [r31]=r22
;;
mov r31 = in3
mov b7 = loc2
* Others, like "pend", are abstractions based on privileged registers.
* "Pend" is guaranteed to be set if reading cr.ivr would return a
* (non-spurious) interrupt. */
-#define xen_get_virtual_psr_i() (*(int *)(XSI_PSR_I))
-#define xen_set_virtual_psr_i(_val) ({ *(int *)(XSI_PSR_I) = _val ? 1:0; })
-#define xen_set_virtual_psr_ic(_val) ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
+#define XSI_PSR_I \
+ (*(uint64_t *)(XSI_PSR_I_ADDR))
+#define xen_get_virtual_psr_i() \
+ (!(*(uint8_t *)(XSI_PSR_I)))
+#define xen_set_virtual_psr_i(_val) \
+ ({ *(uint8_t *)(XSI_PSR_I) = (uint8_t)(_val) ? 0:1; })
+#define xen_set_virtual_psr_ic(_val) \
+ ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
#define xen_get_virtual_pend() (*(int *)(XSI_PEND))
/* Hyperprivops are "break" instructions with a well-defined API.
/* First is shared info page, and then arch specific vcpu context */
DEFINE(XSI_BASE, SHAREDINFO_ADDR);
- DEFINE(XSI_PSR_I_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_delivery_enabled)));
- DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled)));
+ DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_mask_addr)));
+ DEFINE(XSI_PSR_I_ADDR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_mask_addr)));
DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0])));
DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0])));
-
}
regs->ar_rsc |= (2 << 2); /* force PL2/3 */
VCPU(v, banknum) = 1;
VCPU(v, metaphysical_mode) = 1;
+ VCPU(v, interrupt_mask_addr) =
+ (uint64_t)SHAREDINFO_ADDR + INT_ENABLE_OFFSET(v);
}
}
// r16 == cr.isr
// r17 == cr.iim
// r18 == XSI_PSR_IC_OFS
-// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+// r19 == vpsr.ic
// r31 == pr
GLOBAL_ENTRY(fast_hyperprivop)
#ifndef FAST_HYPERPRIVOPS // see beginning of file
// r16 == cr.isr
// r17 == cr.iim
// r18 == XSI_PSR_IC
-// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+// r19 == vpsr.ic
// r31 == pr
ENTRY(hyper_ssm_i)
#ifndef FAST_SSM_I
movl r27=~(IA64_PSR_BE|IA64_PSR_BN);;
or r30=r30,r28;;
and r30=r30,r27;;
+ mov r20=1
+ adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r22=[r22]
st8 [r21]=r30 ;;
// set shared_mem interrupt_delivery_enabled to 0
// set shared_mem interrupt_collection_enabled to 0
- st8 [r18]=r0;;
+ st1 [r22]=r20;;
+ st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
// set shared_mem ifs and incomplete_regframe to 0
cover ;;
cmp.eq p6,p0=r16,r0;;
(p6) br.cond.spnt.few fast_tick_reflect_done;;
// if guest vpsr.i is off, we're done
- adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld4 r21=[r21];;
- cmp.eq p6,p0=r21,r0
+ adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r21=[r21];;
+ ld1 r21=[r21];;
+ cmp.eq p0,p6=r21,r0
(p6) br.cond.spnt.few fast_tick_reflect_done;;
// OK, we have a clock tick to deliver to the active domain!
dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
or r17=r17,r28;;
and r17=r17,r27;;
- ld4 r16=[r18],4;;
+ ld4 r16=[r18],XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS;;
cmp.ne p6,p0=r16,r0;;
+ ld8 r16=[r18],XSI_PSR_IC_OFS-XSI_PSR_I_ADDR_OFS
(p6) dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;;
- ld4 r16=[r18],-4;;
- cmp.ne p6,p0=r16,r0;;
+ ld1 r16=[r16];;
+ cmp.eq p6,p0=r16,r0;;
(p6) dep r17=-1,r17,IA64_PSR_I_BIT,1 ;;
+ mov r20=1
+ adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r22=[r22]
st8 [r21]=r17 ;;
// set shared_mem interrupt_delivery_enabled to 0
// set shared_mem interrupt_collection_enabled to 0
- st8 [r18]=r0;;
+ st1 [r22]=r20;;
+ st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
// set shared_mem ifs and incomplete_regframe to 0
cover ;;
// r16 == cr.isr
// r17 == cr.iim
// r18 == XSI_PSR_IC
-// r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+// r19 == vpsr.ic
// r31 == pr
GLOBAL_ENTRY(fast_break_reflect)
#ifndef FAST_BREAK // see beginning of file
#endif
// save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r21]=r29;;
+ st8 [r21]=r29,XSI_ISR_OFS-XSI_IIP_OFS;;
// set shared_mem isr
- adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r16 ;;
// set cr.ipsr
+ adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
mov r29=r30 ;;
+ ld8 r21=[r21]
movl r28=DELIVER_PSR_SET;;
movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
or r29=r29,r28;;
or r30=r30,r28;;
and r30=r30,r27;;
// also set shared_mem ipsr.i and ipsr.ic appropriately
- ld8 r24=[r18];;
- extr.u r22=r24,32,32
+ ld1 r22=[r21]
+ ld4 r24=[r18];;
cmp4.eq p6,p7=r24,r0;;
(p6) dep r30=0,r30,IA64_PSR_IC_BIT,1
(p7) dep r30=-1,r30,IA64_PSR_IC_BIT,1 ;;
- cmp4.eq p6,p7=r22,r0;;
+ mov r24=r21
+ cmp.ne p6,p7=r22,r0;;
(p6) dep r30=0,r30,IA64_PSR_I_BIT,1
(p7) dep r30=-1,r30,IA64_PSR_I_BIT,1 ;;
+ mov r22=1
adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r30 ;;
// set shared_mem interrupt_delivery_enabled to 0
// set shared_mem interrupt_collection_enabled to 0
- st8 [r18]=r0;;
+ st1 [r24]=r22
+ st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
// set shared_mem ifs and incomplete_regframe to 0
cover ;;
st8 [r21]=r0 ;;
adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r24 ;;
- // vpsr.i = vpsr.ic = 0 on delivery of interruption
- st8 [r18]=r0;;
// FIXME: need to save iipa and isr to be arch-compliant
// set iip to go to domain IVA break instruction vector
movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
cmp.eq p7,p0=r21,r0
(p7) br.spnt.few dispatch_reflection ;;
movl r18=XSI_PSR_IC;;
- ld8 r21=[r18];;
+ ld4 r21=[r18];;
cmp.eq p7,p0=r0,r21
(p7) br.spnt.few dispatch_reflection ;;
// set shared_mem ifa, FIXME: should we validate it?
dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
mov cr.ifs=r20 ;;
// ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
+ adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
// vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
- mov r19=r0 ;;
+ ld8 r20=[r20]
+ mov r19=1
extr.u r23=r21,IA64_PSR_I_BIT,1 ;;
cmp.ne p7,p6=r23,r0 ;;
// not done yet
-(p7) dep r19=-1,r19,32,1
+(p7) st1 [r20]=r0
+(p6) st1 [r20]=r19;;
extr.u r23=r21,IA64_PSR_IC_BIT,1 ;;
cmp.ne p7,p6=r23,r0 ;;
-(p7) dep r19=-1,r19,0,1 ;;
- st8 [r18]=r19 ;;
+(p7) st4 [r18]=r19;;
+(p6) st4 [r18]=r0;;
// force on psr.ic, i, dt, rt, it, bn
movl r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
;;
extr.u r20=r21,41,2 ;; // get v(!)psr.ri
dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei
adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r22]=r16 ;;
+ st8 [r22]=r16,XSI_PSR_I_ADDR_OFS-XSI_ISR_OFS ;;
// set cr.ipsr (make sure cpl==2!)
mov r29=r17 ;;
movl r28=DELIVER_PSR_SET;;
+ mov r20=1
+ ld8 r22=[r22]
movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
or r29=r29,r28;;
and r29=r29,r27;;
// v.ipsr and v.iip are already set (and v.iip validated) as rfi target
// set shared_mem interrupt_delivery_enabled to 0
// set shared_mem interrupt_collection_enabled to 0
- st8 [r18]=r0;;
+ st1 [r22]=r20
+ st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
// set shared_mem ifs and incomplete_regframe to 0
#if 0
#endif
movl r18=XSI_PSR_IC
;;
- ld8 r19=[r18]
+ ld4 r19=[r18]
;;
cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
(p7) br.spnt.many dispatch_privop_fault
#ifdef CONFIG_SMP
#warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
#endif
- regs->r31 = (unsigned long) &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
+ regs->r31 = XSI_IPSR;
- PSCB(v,interrupt_delivery_enabled) = 0;
+ v->vcpu_info->evtchn_upcall_mask = 1;
PSCB(v,interrupt_collection_enabled) = 0;
inc_slow_reflect_count(vector);
ipsr = (struct ia64_psr *)®s->cr_ipsr;
imm = *(struct ia64_psr *)&imm24;
// interrupt flag
- if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
+ if (imm.i)
+ vcpu->vcpu_info->evtchn_upcall_mask = 1;
if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0;
// interrupt collection flag
//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
{
- PSCB(vcpu,interrupt_delivery_enabled) = 1;
+ vcpu->vcpu_info->evtchn_upcall_mask = 0;
PSCB(vcpu,interrupt_collection_enabled) = 1;
return IA64_NO_FAULT;
}
}
if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
if (imm.i) {
- if (!PSCB(vcpu,interrupt_delivery_enabled)) {
+ if (vcpu->vcpu_info->evtchn_upcall_mask) {
//printf("vcpu_set_psr_sm: psr.ic 0->1 ");
enabling_interrupts = 1;
}
- PSCB(vcpu,interrupt_delivery_enabled) = 1;
+ vcpu->vcpu_info->evtchn_upcall_mask = 0;
}
if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
// TODO: do this faster
if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
if (newpsr.i) {
- if (!PSCB(vcpu,interrupt_delivery_enabled))
+ if (vcpu->vcpu_info->evtchn_upcall_mask)
enabling_interrupts = 1;
- PSCB(vcpu,interrupt_delivery_enabled) = 1;
+ vcpu->vcpu_info->evtchn_upcall_mask = 0;
}
if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
newpsr = *(struct ia64_psr *)®s->cr_ipsr;
if (newpsr.cpl == 2) newpsr.cpl = 0;
- if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
+ if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
else newpsr.i = 0;
if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
else newpsr.ic = 0;
BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
{
- return !!PSCB(vcpu,interrupt_delivery_enabled);
+ return !vcpu->vcpu_info->evtchn_upcall_mask;
}
UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
- psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
+ psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
psr.ia64_psr.bn = PSCB(vcpu,banknum);
psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
bits &= ~(1L << bitnum);
*p = bits;
/* clearing an eoi bit may unmask another pending interrupt... */
- if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
+ if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
// worry about this later... Linux only calls eoi
// with interrupts disabled
printf("Trying to EOI interrupt with interrupts enabled\n");
psr.i64 = PSCB(vcpu,ipsr);
if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
- if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
int_enable = psr.ia64_psr.i;
if (psr.ia64_psr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1;
if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) vcpu_set_metaphysical_mode(vcpu,FALSE);
}
PSCB(vcpu,interrupt_collection_enabled) = 1;
vcpu_bsw1(vcpu);
- PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
+ vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
return (IA64_NO_FAULT);
}
if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
regs->cr_iip /*,
- VCPU(current,interrupt_delivery_enabled),
+ !current->vcpu_info->evtchn_upcall_mask,
VCPU(current,pending_interruption) */);
count = 0;
}
#define xen_vastart arch.xen_vastart
#define xen_vaend arch.xen_vaend
#define shared_info_va arch.shared_info_va
+#define INT_ENABLE_OFFSET(v) \
+ (sizeof(vcpu_info_t) * (v)->vcpu_id + \
+ offsetof(vcpu_info_t, evtchn_upcall_mask))
struct arch_vcpu {
#if 1
unsigned long precover_ifs;
unsigned long unat; // not sure if this is needed until NaT arch is done
int interrupt_collection_enabled; // virtual psr.ic
- int interrupt_delivery_enabled; // virtual psr.i
+ /* virtual interrupt deliverable flag is evtchn_upcall_mask in
+ * shared info area now. interrupt_mask_addr is the address
+ * of evtchn_upcall_mask for current vcpu
+ */
+ unsigned long interrupt_mask_addr;
int pending_interruption;
int incomplete_regframe; // see SDM vol2 6.8
unsigned long reserved5_1[4];